# import libraries
from fastai import *
from fastai.vision.all import *
from sklearn.metrics import confusion_matrix
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import PIL
import PIL.Image
import warnings
warnings.filterwarnings('ignore')
# specify path to your data
path = 'data'
# create dataloader
dls = ImageDataLoaders.from_folder(
path,
train='train',
valid='valid',
item_tfms=Resize(224),
batch_tfms=None,
bs=32,
val_bs=32
)
# show 16 sample images from your training set
dls.train.show_batch(max_n=16)
# create you trasfer learning model with ResNet50
learn = cnn_learner(dls, models.resnet50, metrics=accuracy)
learn.summary()
Sequential (Input shape: 32)
============================================================================
Layer (type) Output Shape Param # Trainable
============================================================================
32 x 64 x 112 x 112
Conv2d 9408 False
BatchNorm2d 128 True
ReLU
MaxPool2d
Conv2d 4096 False
BatchNorm2d 128 True
Conv2d 36864 False
BatchNorm2d 128 True
____________________________________________________________________________
32 x 256 x 56 x 56
Conv2d 16384 False
BatchNorm2d 512 True
ReLU
____________________________________________________________________________
32 x 256 x 56 x 56
Conv2d 16384 False
BatchNorm2d 512 True
____________________________________________________________________________
32 x 64 x 56 x 56
Conv2d 16384 False
BatchNorm2d 128 True
Conv2d 36864 False
BatchNorm2d 128 True
____________________________________________________________________________
32 x 256 x 56 x 56
Conv2d 16384 False
BatchNorm2d 512 True
ReLU
____________________________________________________________________________
32 x 64 x 56 x 56
Conv2d 16384 False
BatchNorm2d 128 True
Conv2d 36864 False
BatchNorm2d 128 True
____________________________________________________________________________
32 x 256 x 56 x 56
Conv2d 16384 False
BatchNorm2d 512 True
ReLU
____________________________________________________________________________
32 x 128 x 56 x 56
Conv2d 32768 False
BatchNorm2d 256 True
____________________________________________________________________________
32 x 128 x 28 x 28
Conv2d 147456 False
BatchNorm2d 256 True
____________________________________________________________________________
32 x 512 x 28 x 28
Conv2d 65536 False
BatchNorm2d 1024 True
ReLU
____________________________________________________________________________
32 x 512 x 28 x 28
Conv2d 131072 False
BatchNorm2d 1024 True
____________________________________________________________________________
32 x 128 x 28 x 28
Conv2d 65536 False
BatchNorm2d 256 True
Conv2d 147456 False
BatchNorm2d 256 True
____________________________________________________________________________
32 x 512 x 28 x 28
Conv2d 65536 False
BatchNorm2d 1024 True
ReLU
____________________________________________________________________________
32 x 128 x 28 x 28
Conv2d 65536 False
BatchNorm2d 256 True
Conv2d 147456 False
BatchNorm2d 256 True
____________________________________________________________________________
32 x 512 x 28 x 28
Conv2d 65536 False
BatchNorm2d 1024 True
ReLU
____________________________________________________________________________
32 x 128 x 28 x 28
Conv2d 65536 False
BatchNorm2d 256 True
Conv2d 147456 False
BatchNorm2d 256 True
____________________________________________________________________________
32 x 512 x 28 x 28
Conv2d 65536 False
BatchNorm2d 1024 True
ReLU
____________________________________________________________________________
32 x 256 x 28 x 28
Conv2d 131072 False
BatchNorm2d 512 True
____________________________________________________________________________
32 x 256 x 14 x 14
Conv2d 589824 False
BatchNorm2d 512 True
____________________________________________________________________________
32 x 1024 x 14 x 14
Conv2d 262144 False
BatchNorm2d 2048 True
ReLU
____________________________________________________________________________
32 x 1024 x 14 x 14
Conv2d 524288 False
BatchNorm2d 2048 True
____________________________________________________________________________
32 x 256 x 14 x 14
Conv2d 262144 False
BatchNorm2d 512 True
Conv2d 589824 False
BatchNorm2d 512 True
____________________________________________________________________________
32 x 1024 x 14 x 14
Conv2d 262144 False
BatchNorm2d 2048 True
ReLU
____________________________________________________________________________
32 x 256 x 14 x 14
Conv2d 262144 False
BatchNorm2d 512 True
Conv2d 589824 False
BatchNorm2d 512 True
____________________________________________________________________________
32 x 1024 x 14 x 14
Conv2d 262144 False
BatchNorm2d 2048 True
ReLU
____________________________________________________________________________
32 x 256 x 14 x 14
Conv2d 262144 False
BatchNorm2d 512 True
Conv2d 589824 False
BatchNorm2d 512 True
____________________________________________________________________________
32 x 1024 x 14 x 14
Conv2d 262144 False
BatchNorm2d 2048 True
ReLU
____________________________________________________________________________
32 x 256 x 14 x 14
Conv2d 262144 False
BatchNorm2d 512 True
Conv2d 589824 False
BatchNorm2d 512 True
____________________________________________________________________________
32 x 1024 x 14 x 14
Conv2d 262144 False
BatchNorm2d 2048 True
ReLU
____________________________________________________________________________
32 x 256 x 14 x 14
Conv2d 262144 False
BatchNorm2d 512 True
Conv2d 589824 False
BatchNorm2d 512 True
____________________________________________________________________________
32 x 1024 x 14 x 14
Conv2d 262144 False
BatchNorm2d 2048 True
ReLU
____________________________________________________________________________
32 x 512 x 14 x 14
Conv2d 524288 False
BatchNorm2d 1024 True
____________________________________________________________________________
32 x 512 x 7 x 7
Conv2d 2359296 False
BatchNorm2d 1024 True
____________________________________________________________________________
32 x 2048 x 7 x 7
Conv2d 1048576 False
BatchNorm2d 4096 True
ReLU
____________________________________________________________________________
32 x 2048 x 7 x 7
Conv2d 2097152 False
BatchNorm2d 4096 True
____________________________________________________________________________
32 x 512 x 7 x 7
Conv2d 1048576 False
BatchNorm2d 1024 True
Conv2d 2359296 False
BatchNorm2d 1024 True
____________________________________________________________________________
32 x 2048 x 7 x 7
Conv2d 1048576 False
BatchNorm2d 4096 True
ReLU
____________________________________________________________________________
32 x 512 x 7 x 7
Conv2d 1048576 False
BatchNorm2d 1024 True
Conv2d 2359296 False
BatchNorm2d 1024 True
____________________________________________________________________________
32 x 2048 x 7 x 7
Conv2d 1048576 False
BatchNorm2d 4096 True
ReLU
AdaptiveAvgPool2d
AdaptiveMaxPool2d
Flatten
BatchNorm1d 8192 True
Dropout
____________________________________________________________________________
32 x 512
Linear 2097152 True
ReLU
BatchNorm1d 1024 True
Dropout
____________________________________________________________________________
32 x 2
Linear 1024 True
____________________________________________________________________________
Total params: 25,615,424
Total trainable params: 2,160,512
Total non-trainable params: 23,454,912
Optimizer used: <function Adam at 0x7ff1116ddf70>
Loss function: FlattenedLoss of CrossEntropyLoss()
Model frozen up to parameter group #2
Callbacks:
- TrainEvalCallback
- Recorder
- ProgressCallback
# train the classifier of your ML model
learn.fit_one_cycle(10)
| epoch | train_loss | valid_loss | accuracy | time |
|---|---|---|---|---|
| 0 | 0.828958 | 0.267547 | 0.880000 | 00:04 |
| 1 | 0.396593 | 0.096239 | 0.970000 | 00:03 |
| 2 | 0.237563 | 0.073978 | 0.990000 | 00:03 |
| 3 | 0.159168 | 0.088994 | 0.980000 | 00:04 |
| 4 | 0.113461 | 0.093329 | 0.990000 | 00:03 |
| 5 | 0.084038 | 0.102305 | 0.980000 | 00:04 |
| 6 | 0.063870 | 0.096275 | 0.990000 | 00:04 |
| 7 | 0.054412 | 0.090011 | 0.990000 | 00:04 |
| 8 | 0.043273 | 0.082855 | 0.990000 | 00:04 |
| 9 | 0.034451 | 0.086828 | 0.990000 | 00:04 |
learn.export()
del learn
learn = load_learner('data/export.pkl')
files = get_image_files('data/test')
test_dl = learn.dls.test_dl(files, with_labels=True)
preds, y_true = learn.get_preds(dl=test_dl)
y_pred = torch.argmax(preds, dim=1)
cm = confusion_matrix(y_true, y_pred)
cm
array([[25, 0],
[ 0, 25]])
# calculater sensitivity, specificity, positive predictive value, negative predictive value
cm = confusion_matrix(y_true, y_pred)
TN = cm[0,0]
FP = cm[0,1]
FN = cm[1,0]
TP = cm[1,1]
sensitivity = (TP/(TP + FN))*100
specificity = (TN/(TN + FP))*100
PPV = (TP/(TP + FP))*100
NPV = (TN/(TN + FN))*100
print(f'Sensitivity: {sensitivity: .2f}%')
print(f'Specificity: {specificity: .2f}%')
print(f'Positive predictive value:{PPV: .2f}%')
print(f'Negative predictive value:{NPV: .2f}%')
Sensitivity: 100.00% Specificity: 100.00% Positive predictive value: 100.00% Negative predictive value: 100.00%
# plot confusion matrix
df_cm = pd.DataFrame(cm,
columns=['Colon Adenocarcinoma', 'Lung Adenocarcinoma'],
index = ['Colon Adenocarcinoma', 'Lung Adenocarcinoma'])
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize = (15,10))
sns.set(font_scale=1.5)
sns.heatmap(df_cm, cmap="Blues", annot=True,annot_kws={"size": 20})
<AxesSubplot:xlabel='Predicted', ylabel='Actual'>
!pwd
/home/ab/Projects/met_aca
# make prediction on single image
image_path = 'data/test/LungACA/Image038.jpeg'
dx = learn.predict(image_path)[0]
if dx == 'ColonACA':
print('This image most likely belongs to Colon Adenocarcinoma.')
else:
print('This image most likely belongs to Lung Adenocarcinoma.')
PIL.Image.open(image_path)
This image most likely belongs to Lung Adenocarcinoma.
# make prediction on single image
image_path = 'data/test/ColonACA/Image031.jpeg'
dx = learn.predict(image_path)[0]
if dx == 'ColonACA':
print('This image most likely belongs to Colon Adenocarcinoma.')
else:
print('This image most likely belongs to Lung Adenocarcinoma.')
PIL.Image.open(image_path)
This image most likely belongs to Colon Adenocarcinoma.